--- /dev/null
+/*
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Author: Leo Duran <leo.duran@amd.com>
+ * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <asm/amd-iommu.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
+#include <asm/hvm/svm/amd-iommu-acpi.h>
+
+extern unsigned long amd_iommu_page_entries;
+extern unsigned short ivrs_bdf_entries;
+extern struct ivrs_mappings *ivrs_mappings;
+
+static struct amd_iommu * __init find_iommu_from_bdf_cap(
+ u16 bdf, u8 cap_offset)
+{
+ struct amd_iommu *iommu;
+
+ for_each_amd_iommu( iommu )
+ if ( iommu->bdf == bdf && iommu->cap_offset == cap_offset )
+ return iommu;
+
+ return NULL;
+}
+
+static void __init reserve_iommu_exclusion_range(struct amd_iommu *iommu,
+ unsigned long base, unsigned long limit)
+{
+ /* need to extend exclusion range? */
+ if ( iommu->exclusion_enable )
+ {
+ if ( iommu->exclusion_base < base )
+ base = iommu->exclusion_base;
+ if ( iommu->exclusion_limit > limit )
+ limit = iommu->exclusion_limit;
+ }
+
+ iommu->exclusion_enable = IOMMU_CONTROL_ENABLED;
+ iommu->exclusion_base = base;
+ iommu->exclusion_limit = limit;
+}
+
+static void __init reserve_iommu_exclusion_range_all(struct amd_iommu *iommu,
+ unsigned long base, unsigned long limit)
+{
+ reserve_iommu_exclusion_range(iommu, base, limit);
+ iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;
+}
+
+static void __init reserve_unity_map_for_device(u16 bdf, unsigned long base,
+ unsigned long length, u8 iw, u8 ir)
+{
+ unsigned long old_top, new_top;
+
+ /* need to extend unity-mapped range? */
+ if ( ivrs_mappings[bdf].unity_map_enable )
+ {
+ old_top = ivrs_mappings[bdf].addr_range_start +
+ ivrs_mappings[bdf].addr_range_length;
+ new_top = base + length;
+ if ( old_top > new_top )
+ new_top = old_top;
+ if ( ivrs_mappings[bdf].addr_range_start < base )
+ base = ivrs_mappings[bdf].addr_range_start;
+ length = new_top - base;
+ }
+
+ /* extend r/w permissioms and keep aggregate */
+ if ( iw )
+ ivrs_mappings[bdf].write_permission = IOMMU_CONTROL_ENABLED;
+ if ( ir )
+ ivrs_mappings[bdf].read_permission = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[bdf].addr_range_start = base;
+ ivrs_mappings[bdf].addr_range_length = length;
+}
+
+static int __init register_exclusion_range_for_all_devices(
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ unsigned long range_top, iommu_top, length;
+ struct amd_iommu *iommu;
+ u16 bdf;
+
+ /* is part of exclusion range inside of IOMMU virtual address space? */
+ /* note: 'limit' parameter is assumed to be page-aligned */
+ range_top = limit + PAGE_SIZE;
+ iommu_top = max_page * PAGE_SIZE;
+ if ( base < iommu_top )
+ {
+ if (range_top > iommu_top)
+ range_top = iommu_top;
+ length = range_top - base;
+ /* reserve r/w unity-mapped page entries for devices */
+ /* note: these entries are part of the exclusion range */
+ for (bdf = 0; bdf < ivrs_bdf_entries; ++bdf)
+ reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ /* push 'base' just outside of virtual address space */
+ base = iommu_top;
+ }
+ /* register IOMMU exclusion range settings */
+ if (limit >= iommu_top)
+ {
+ for_each_amd_iommu( iommu )
+ reserve_iommu_exclusion_range_all(iommu, base, limit);
+ }
+
+ return 0;
+}
+
+static int __init register_exclusion_range_for_device(u16 bdf,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ unsigned long range_top, iommu_top, length;
+ struct amd_iommu *iommu;
+ u16 bus, devfn, req;
+
+ bus = bdf >> 8;
+ devfn = bdf & 0xFF;
+ iommu = find_iommu_for_device(bus, devfn);
+ if ( !iommu )
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
+ return -ENODEV;
+ }
+ req = ivrs_mappings[bdf].dte_requestor_id;
+
+ /* note: 'limit' parameter is assumed to be page-aligned */
+ range_top = limit + PAGE_SIZE;
+ iommu_top = max_page * PAGE_SIZE;
+ if ( base < iommu_top )
+ {
+ if (range_top > iommu_top)
+ range_top = iommu_top;
+ length = range_top - base;
+ /* reserve unity-mapped page entries for device */
+ /* note: these entries are part of the exclusion range */
+ reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ reserve_unity_map_for_device(req, base, length, iw, ir);
+
+ /* push 'base' just outside of virtual address space */
+ base = iommu_top;
+ }
+
+ /* register IOMMU exclusion range settings for device */
+ if ( limit >= iommu_top )
+ {
+ reserve_iommu_exclusion_range(iommu, base, limit);
+ ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
+ }
+
+ return 0;
+}
+
+static int __init register_exclusion_range_for_iommu_devices(
+ struct amd_iommu *iommu,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ unsigned long range_top, iommu_top, length;
+ u16 bus, devfn, bdf, req;
+
+ /* is part of exclusion range inside of IOMMU virtual address space? */
+ /* note: 'limit' parameter is assumed to be page-aligned */
+ range_top = limit + PAGE_SIZE;
+ iommu_top = max_page * PAGE_SIZE;
+ if ( base < iommu_top )
+ {
+ if (range_top > iommu_top)
+ range_top = iommu_top;
+ length = range_top - base;
+ /* reserve r/w unity-mapped page entries for devices */
+ /* note: these entries are part of the exclusion range */
+ for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
+ {
+ bus = bdf >> 8;
+ devfn = bdf & 0xFF;
+ if ( iommu == find_iommu_for_device(bus, devfn) )
+ {
+ reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ req = ivrs_mappings[bdf].dte_requestor_id;
+ reserve_unity_map_for_device(req, base, length, iw, ir);
+ }
+ }
+
+ /* push 'base' just outside of virtual address space */
+ base = iommu_top;
+ }
+
+ /* register IOMMU exclusion range settings */
+ if (limit >= iommu_top)
+ reserve_iommu_exclusion_range_all(iommu, base, limit);
+ return 0;
+}
+
+static int __init parse_ivmd_device_select(
+ struct acpi_ivmd_block_header *ivmd_block,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ u16 bdf;
+
+ bdf = ivmd_block->header.dev_id;
+ if (bdf >= ivrs_bdf_entries)
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
+ return -ENODEV;
+ }
+
+ return register_exclusion_range_for_device(bdf, base, limit, iw, ir);
+}
+
+static int __init parse_ivmd_device_range(
+ struct acpi_ivmd_block_header *ivmd_block,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ u16 first_bdf, last_bdf, bdf;
+ int error;
+
+ first_bdf = ivmd_block->header.dev_id;
+ if (first_bdf >= ivrs_bdf_entries)
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: "
+ "Invalid Range_First Dev_Id 0x%x\n", first_bdf);
+ return -ENODEV;
+ }
+
+ last_bdf = ivmd_block->last_dev_id;
+ if (last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf)
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: "
+ "Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
+ return -ENODEV;
+ }
+
+ dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+
+ for ( bdf = first_bdf, error = 0;
+ bdf <= last_bdf && !error; ++bdf )
+ {
+ error = register_exclusion_range_for_device(
+ bdf, base, limit, iw, ir);
+ }
+
+ return error;
+}
+
+static int __init parse_ivmd_device_iommu(
+ struct acpi_ivmd_block_header *ivmd_block,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ struct amd_iommu *iommu;
+
+ /* find target IOMMU */
+ iommu = find_iommu_from_bdf_cap(ivmd_block->header.dev_id,
+ ivmd_block->cap_offset);
+ if ( !iommu )
+ {
+ dprintk(XENLOG_ERR,
+ "IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
+ ivmd_block->header.dev_id, ivmd_block->cap_offset);
+ return -ENODEV;
+ }
+
+ return register_exclusion_range_for_iommu_devices(
+ iommu, base, limit, iw, ir);
+}
+
+static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
+{
+ unsigned long start_addr, mem_length, base, limit;
+ u8 iw, ir;
+
+ if (ivmd_block->header.length <
+ sizeof(struct acpi_ivmd_block_header))
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n");
+ return -ENODEV;
+ }
+
+ start_addr = (unsigned long)ivmd_block->start_addr;
+ mem_length = (unsigned long)ivmd_block->mem_length;
+ base = start_addr & PAGE_MASK;
+ limit = (start_addr + mem_length - 1) & PAGE_MASK;
+
+ dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n",
+ ivmd_block->header.type);
+ dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr);
+ dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length);
+
+ if ( get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK,
+ AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT) )
+ iw = ir = IOMMU_CONTROL_ENABLED;
+ else if ( get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_UNITY_MAPPING_MASK,
+ AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT) )
+ {
+ iw = get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_IW_PERMISSION_MASK,
+ AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT);
+ ir = get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_IR_PERMISSION_MASK,
+ AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT);
+ }
+ else
+ {
+ dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n");
+ return -ENODEV;
+ }
+
+ switch( ivmd_block->header.type )
+ {
+ case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
+ return register_exclusion_range_for_all_devices(
+ base, limit, iw, ir);
+
+ case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
+ return parse_ivmd_device_select(ivmd_block,
+ base, limit, iw, ir);
+
+ case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
+ return parse_ivmd_device_range(ivmd_block,
+ base, limit, iw, ir);
+
+ case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
+ return parse_ivmd_device_iommu(ivmd_block,
+ base, limit, iw, ir);
+
+ default:
+ dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n");
+ return -ENODEV;
+ }
+}
+
+static u16 __init parse_ivhd_device_padding(u16 pad_length,
+ u16 header_length, u16 block_length)
+{
+ if ( header_length < (block_length + pad_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ return pad_length;
+}
+
+static u16 __init parse_ivhd_device_select(
+ union acpi_ivhd_device *ivhd_device)
+{
+ u16 bdf;
+
+ bdf = ivhd_device->header.dev_id;
+ if ( bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ return 0;
+ }
+
+ /* override flags for device */
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+
+ return sizeof(struct acpi_ivhd_device_header);
+}
+
+static u16 __init parse_ivhd_device_range(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, first_bdf, last_bdf, bdf;
+ u8 sys_mgt;
+
+ dev_length = sizeof(struct acpi_ivhd_device_range);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ if ( ivhd_device->range.trailer.type !=
+ AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END) {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: End_Type 0x%x\n",
+ ivhd_device->range.trailer.type);
+ return 0;
+ }
+
+ first_bdf = ivhd_device->header.dev_id;
+ if ( first_bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ return 0;
+ }
+
+ last_bdf = ivhd_device->range.trailer.dev_id;
+ if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+
+ /* override flags for range of devices */
+ sys_mgt = get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
+ ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_alias(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, alias_id, bdf;
+
+ dev_length = sizeof(struct acpi_ivhd_device_alias);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ bdf = ivhd_device->header.dev_id;
+ if ( bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ return 0;
+ }
+
+ alias_id = ivhd_device->alias.dev_id;
+ if ( alias_id >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Alias Dev_Id 0x%x\n", alias_id);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
+
+ /* override requestor_id and flags for device */
+ ivrs_mappings[bdf].dte_requestor_id = alias_id;
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ ivrs_mappings[alias_id].dte_sys_mgt_enable =
+ ivrs_mappings[bdf].dte_sys_mgt_enable;
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_alias_range(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+
+ u16 dev_length, first_bdf, last_bdf, alias_id, bdf;
+ u8 sys_mgt;
+
+ dev_length = sizeof(struct acpi_ivhd_device_alias_range);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ if ( ivhd_device->alias_range.trailer.type !=
+ AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: End_Type 0x%x\n",
+ ivhd_device->alias_range.trailer.type);
+ return 0;
+ }
+
+ first_bdf = ivhd_device->header.dev_id;
+ if ( first_bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR,"IVHD Error: "
+ "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ return 0;
+ }
+
+ last_bdf = ivhd_device->alias_range.trailer.dev_id;
+ if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ return 0;
+ }
+
+ alias_id = ivhd_device->alias_range.alias.dev_id;
+ if ( alias_id >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Alias Dev_Id 0x%x\n", alias_id);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+ dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
+
+ /* override requestor_id and flags for range of devices */
+ sys_mgt = get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
+ {
+ ivrs_mappings[bdf].dte_requestor_id = alias_id;
+ ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+ }
+ ivrs_mappings[alias_id].dte_sys_mgt_enable = sys_mgt;
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_extended(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, bdf;
+
+ dev_length = sizeof(struct acpi_ivhd_device_extended);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ bdf = ivhd_device->header.dev_id;
+ if ( bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ return 0;
+ }
+
+ /* override flags for device */
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_extended_range(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, first_bdf, last_bdf, bdf;
+ u8 sys_mgt;
+
+ dev_length = sizeof(struct acpi_ivhd_device_extended_range);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ if ( ivhd_device->extended_range.trailer.type !=
+ AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: End_Type 0x%x\n",
+ ivhd_device->extended_range.trailer.type);
+ return 0;
+ }
+
+ first_bdf = ivhd_device->header.dev_id;
+ if ( first_bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ return 0;
+ }
+
+ last_bdf = ivhd_device->extended_range.trailer.dev_id;
+ if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+
+ /* override flags for range of devices */
+ sys_mgt = get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
+ ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+
+ return dev_length;
+}
+
+static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
+{
+ union acpi_ivhd_device *ivhd_device;
+ u16 block_length, dev_length;
+ struct amd_iommu *iommu;
+
+ if ( ivhd_block->header.length <
+ sizeof(struct acpi_ivhd_block_header) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n");
+ return -ENODEV;
+ }
+
+ iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id,
+ ivhd_block->cap_offset);
+ if ( !iommu )
+ {
+ dprintk(XENLOG_ERR,
+ "IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
+ ivhd_block->header.dev_id, ivhd_block->cap_offset);
+ return -ENODEV;
+ }
+
+ dprintk(XENLOG_INFO, "IVHD Block:\n");
+ dprintk(XENLOG_INFO, " Cap_Offset 0x%x\n",
+ ivhd_block->cap_offset);
+ dprintk(XENLOG_INFO, " MMIO_BAR_Phys 0x%lx\n",
+ (unsigned long)ivhd_block->mmio_base);
+ dprintk(XENLOG_INFO, " PCI_Segment 0x%x\n",
+ ivhd_block->pci_segment);
+ dprintk(XENLOG_INFO, " IOMMU_Info 0x%x\n",
+ ivhd_block->iommu_info);
+
+ /* override IOMMU support flags */
+ iommu->coherent = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_COHERENT_MASK,
+ AMD_IOMMU_ACPI_COHERENT_SHIFT);
+ iommu->iotlb_support = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_IOTLB_SUP_MASK,
+ AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT);
+ iommu->isochronous = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_ISOC_MASK,
+ AMD_IOMMU_ACPI_ISOC_SHIFT);
+ iommu->res_pass_pw = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_RES_PASS_PW_MASK,
+ AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT);
+ iommu->pass_pw = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_PASS_PW_MASK,
+ AMD_IOMMU_ACPI_PASS_PW_SHIFT);
+ iommu->ht_tunnel_enable = get_field_from_byte(
+ ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_HT_TUN_ENB_MASK,
+ AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT);
+
+ /* parse Device Entries */
+ block_length = sizeof(struct acpi_ivhd_block_header);
+ while( ivhd_block->header.length >=
+ (block_length + sizeof(struct acpi_ivhd_device_header)) )
+ {
+ ivhd_device = (union acpi_ivhd_device *)
+ ((u8 *)ivhd_block + block_length);
+
+ dprintk(XENLOG_INFO, "IVHD Device Entry:\n");
+ dprintk(XENLOG_INFO, " Type 0x%x\n",
+ ivhd_device->header.type);
+ dprintk(XENLOG_INFO, " Dev_Id 0x%x\n",
+ ivhd_device->header.dev_id);
+ dprintk(XENLOG_INFO, " Flags 0x%x\n",
+ ivhd_device->header.flags);
+
+ switch( ivhd_device->header.type )
+ {
+ case AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD:
+ dev_length = parse_ivhd_device_padding(
+ sizeof(u32),
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_U64_PAD:
+ dev_length = parse_ivhd_device_padding(
+ sizeof(u64),
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_SELECT:
+ dev_length = parse_ivhd_device_select(ivhd_device);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START:
+ dev_length = parse_ivhd_device_range(ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT:
+ dev_length = parse_ivhd_device_alias(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE:
+ dev_length = parse_ivhd_device_alias_range(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT:
+ dev_length = parse_ivhd_device_extended(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE:
+ dev_length = parse_ivhd_device_extended_range(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ default:
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device Type!\n");
+ dev_length = 0;
+ break;
+ }
+
+ block_length += dev_length;
+ if ( !dev_length )
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init parse_ivrs_block(struct acpi_ivrs_block_header *ivrs_block)
+{
+ struct acpi_ivhd_block_header *ivhd_block;
+ struct acpi_ivmd_block_header *ivmd_block;
+
+ switch(ivrs_block->type)
+ {
+ case AMD_IOMMU_ACPI_IVHD_TYPE:
+ ivhd_block = (struct acpi_ivhd_block_header *)ivrs_block;
+ return parse_ivhd_block(ivhd_block);
+
+ case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
+ case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
+ case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
+ case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
+ ivmd_block = (struct acpi_ivmd_block_header *)ivrs_block;
+ return parse_ivmd_block(ivmd_block);
+
+ default:
+ dprintk(XENLOG_ERR, "IVRS Error: Invalid Block Type!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void __init dump_acpi_table_header(struct acpi_table_header *table)
+{
+ int i;
+
+ printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n");
+ printk(XENLOG_INFO " Signature ");
+ for ( i = 0; i < ACPI_NAME_SIZE; ++i )
+ printk("%c", table->signature[i]);
+ printk("\n");
+
+ printk(" Length 0x%x\n", table->length);
+ printk(" Revision 0x%x\n", table->revision);
+ printk(" CheckSum 0x%x\n", table->checksum);
+
+ printk(" OEM_Id ");
+ for ( i = 0; i < ACPI_OEM_ID_SIZE; ++i )
+ printk("%c", table->oem_id[i]);
+ printk("\n");
+
+ printk(" OEM_Table_Id ");
+ for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; ++i )
+ printk("%c", table->oem_table_id[i]);
+ printk("\n");
+
+ printk(" OEM_Revision 0x%x\n", table->oem_revision);
+
+ printk(" Creator_Id ");
+ for ( i = 0; i < ACPI_NAME_SIZE; ++i )
+ printk("%c", table->asl_compiler_id[i]);
+ printk("\n");
+
+ printk(" Creator_Revision 0x%x\n",
+ table->asl_compiler_revision);
+}
+
+int __init parse_ivrs_table(unsigned long phys_addr,
+ unsigned long size)
+{
+ struct acpi_ivrs_block_header *ivrs_block;
+ unsigned long length, i;
+ u8 checksum, *raw_table;
+ int error = 0;
+ struct acpi_table_header *table =
+ (struct acpi_table_header *) __acpi_map_table(phys_addr, size);
+
+ BUG_ON(!table);
+
+#if 0
+ dump_acpi_table_header(table);
+#endif
+
+ /* validate checksum: sum of entire table == 0 */
+ checksum = 0;
+ raw_table = (u8 *)table;
+ for ( i = 0; i < table->length; ++i )
+ checksum += raw_table[i];
+ if ( checksum )
+ {
+ dprintk(XENLOG_ERR, "IVRS Error: "
+ "Invalid Checksum 0x%x\n", checksum);
+ return -ENODEV;
+ }
+
+ /* parse IVRS blocks */
+ length = sizeof(struct acpi_ivrs_table_header);
+ while( error == 0 && table->length >
+ (length + sizeof(struct acpi_ivrs_block_header)) )
+ {
+ ivrs_block = (struct acpi_ivrs_block_header *)
+ ((u8 *)table + length);
+
+ dprintk(XENLOG_INFO, "IVRS Block:\n");
+ dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type);
+ dprintk(XENLOG_INFO, " Flags 0x%x\n", ivrs_block->flags);
+ dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length);
+ dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id);
+
+ if (table->length >= (length + ivrs_block->length))
+ error = parse_ivrs_block(ivrs_block);
+ else
+ {
+ dprintk(XENLOG_ERR, "IVRS Error: "
+ "Table Length Exceeded: 0x%x -> 0x%lx\n",
+ table->length,
+ (length + ivrs_block->length));
+ return -ENODEV;
+ }
+ length += ivrs_block->length;
+ }
+
+ return error;
+}
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#include <asm/hvm/svm/amd-iommu-acpi.h>
#include <xen/sched.h>
#include <asm/mm.h>
#include "../pci-direct.h"
static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
int nr_amd_iommus = 0;
+unsigned short ivrs_bdf_entries = 0;
+struct ivrs_mappings *ivrs_mappings = NULL;
+
/* will set if amd-iommu HW is found */
int amd_iommu_enabled = 0;
deallocate_iommu_resources(iommu);
xfree(iommu);
}
-}
-static int requestor_id_from_bdf(int bdf)
-{
- /* HACK - HACK */
- /* account for possible 'aliasing' by parent device */
- return bdf;
+ if ( ivrs_mappings )
+ {
+ xfree(ivrs_mappings);
+ ivrs_mappings = NULL;
+ }
}
static int __init allocate_iommu_table_struct(struct table_struct *table,
{
struct amd_iommu *iommu;
unsigned long flags;
+ u16 bdf;
for_each_amd_iommu ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
+ /* assign default IOMMU values */
+ iommu->coherent = IOMMU_CONTROL_ENABLED;
+ iommu->isochronous = IOMMU_CONTROL_ENABLED;
+ iommu->res_pass_pw = IOMMU_CONTROL_ENABLED;
+ iommu->pass_pw = IOMMU_CONTROL_ENABLED;
+ iommu->ht_tunnel_enable = iommu->ht_tunnel_support ?
+ IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED;
+ iommu->exclusion_enable = IOMMU_CONTROL_DISABLED;
+ iommu->exclusion_allow_all = IOMMU_CONTROL_DISABLED;
+
/* register IOMMU data strucures in MMIO space */
if ( map_iommu_mmio_region(iommu) != 0 )
goto error_out;
register_iommu_dev_table_in_mmio_space(iommu);
register_iommu_cmd_buffer_in_mmio_space(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+ /* assign default values for device entries */
+ for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
+ {
+ ivrs_mappings[bdf].dte_requestor_id = bdf;
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED;
+ ivrs_mappings[bdf].dte_allow_exclusion =
+ IOMMU_CONTROL_DISABLED;
+ ivrs_mappings[bdf].unity_map_enable =
+ IOMMU_CONTROL_DISABLED;
+ }
+
+ if ( acpi_table_parse(ACPI_IVRS, parse_ivrs_table) != 0 )
+ dprintk(XENLOG_INFO, "AMD IOMMU: Did not find IVRS table!\n");
+
+ for_each_amd_iommu ( iommu )
+ {
+ spin_lock_irqsave(&iommu->lock, flags);
/* enable IOMMU translation services */
enable_iommu(iommu);
nr_amd_iommus++;
-
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
void amd_iommu_setup_domain_device(
- struct domain *domain, struct amd_iommu *iommu, int requestor_id)
+ struct domain *domain, struct amd_iommu *iommu, int bdf)
{
void *dte;
u64 root_ptr;
unsigned long flags;
+ int req_id;
+ u8 sys_mgt, dev_ex;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- BUG_ON( !hd->root_table||!hd->paging_mode );
+ BUG_ON( !hd->root_table || !hd->paging_mode );
root_ptr = (u64)virt_to_maddr(hd->root_table);
+ /* get device-table entry */
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
dte = iommu->dev_table.buffer +
- (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
- amd_iommu_set_dev_table_entry(
- (u32 *)dte,
- root_ptr, hd->domain_id, hd->paging_mode);
- invalidate_dev_table_entry(iommu, requestor_id);
+ /* bind DTE to domain page-tables */
+ sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
+ dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
+ amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
+ req_id, sys_mgt, dev_ex, hd->paging_mode);
+
+ invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
"root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
- requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
+ req_id, root_ptr, hd->domain_id, hd->paging_mode);
spin_unlock_irqrestore(&iommu->lock, flags);
}
struct pci_dev *pdev;
int bus, dev, func;
u32 l;
- int req_id, bdf;
+ int bdf;
for ( bus = 0; bus < 256; bus++ )
{
list_add_tail(&pdev->list, &hd->pdev_list);
bdf = (bus << 8) | pdev->devfn;
- req_id = requestor_id_from_bdf(bdf);
- iommu = find_iommu_for_device(bus, pdev->devfn);
+ /* supported device? */
+ iommu = (bdf < ivrs_bdf_entries) ?
+ find_iommu_for_device(bus, pdev->devfn) : NULL;
if ( iommu )
- amd_iommu_setup_domain_device(dom0, iommu, req_id);
+ amd_iommu_setup_domain_device(dom0, iommu, bdf);
}
}
}
int amd_iommu_detect(void)
{
unsigned long i;
+ int last_bus;
+ struct amd_iommu *iommu;
if ( !enable_amd_iommu )
{
printk("AMD IOMMU: Not found!\n");
return 0;
}
+ else
+ {
+ /* allocate 'ivrs mappings' table */
+ /* note: the table has entries to accomodate all IOMMUs */
+ last_bus = 0;
+ for_each_amd_iommu (iommu)
+ if (iommu->last_downstream_bus > last_bus)
+ last_bus = iommu->last_downstream_bus;
+
+ ivrs_bdf_entries = (last_bus + 1) *
+ IOMMU_DEV_TABLE_ENTRIES_PER_BUS;
+ ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
+
+ if ( !ivrs_mappings )
+ {
+ dprintk(XENLOG_ERR, "AMD IOMMU:"
+ " Error allocating IVRS DevMappings table\n");
+ goto error_out;
+ }
+ memset(ivrs_mappings, 0,
+ ivrs_bdf_entries * sizeof(struct ivrs_mappings));
+ }
if ( amd_iommu_init() != 0 )
{
}
static void amd_iommu_disable_domain_device(
- struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
+ struct domain *domain, struct amd_iommu *iommu, int bdf)
{
void *dte;
unsigned long flags;
+ int req_id;
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
dte = iommu->dev_table.buffer +
- (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
- invalidate_dev_table_entry(iommu, requestor_id);
+ invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
" domain_id:%d, paging_mode:%d\n",
- requestor_id, domain_hvm_iommu(domain)->domain_id,
+ req_id, domain_hvm_iommu(domain)->domain_id,
domain_hvm_iommu(domain)->paging_mode);
spin_unlock_irqrestore(&iommu->lock, flags);
}
struct hvm_iommu *target_hd = domain_hvm_iommu(target);
struct pci_dev *pdev;
struct amd_iommu *iommu;
- int req_id, bdf;
+ int bdf;
unsigned long flags;
for_each_pdev( source, pdev )
pdev->devfn = devfn;
bdf = (bus << 8) | devfn;
- req_id = requestor_id_from_bdf(bdf);
- iommu = find_iommu_for_device(bus, devfn);
+ /* supported device? */
+ iommu = (bdf < ivrs_bdf_entries) ?
+ find_iommu_for_device(bus, pdev->devfn) : NULL;
if ( iommu )
{
- amd_iommu_disable_domain_device(source, iommu, req_id);
+ amd_iommu_disable_domain_device(source, iommu, bdf);
/* Move pci device from the source domain to target domain. */
spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
- amd_iommu_setup_domain_device(target, iommu, req_id);
+ amd_iommu_setup_domain_device(target, iommu, bdf);
gdprintk(XENLOG_INFO ,
"AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
{
+ int bdf = (bus << 8) | devfn;
+ int req_id;
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
+
+ if (ivrs_mappings[req_id].unity_map_enable)
+ {
+ amd_iommu_reserve_domain_unity_map(d,
+ ivrs_mappings[req_id].addr_range_start,
+ ivrs_mappings[req_id].addr_range_length,
+ ivrs_mappings[req_id].write_permission,
+ ivrs_mappings[req_id].read_permission);
+ }
+
pdev_flr(bus, devfn);
return reassign_device(dom0, d, bus, devfn);
}